Fit VBGE models to back-calculated length-at-age

Author

Max Lindmark, Jan Ohlberger, Anna Gårdmark

Published

April 12, 2024

Load libraries

library(tidyverse)
library(tidylog)
library(broom)
library(RColorBrewer)
library(viridis)
library(minpack.lm)
library(patchwork)
library(ggtext)
library(brms)
library(modelr)
library(tidybayes)
library(ggridges)
library(performance)

# devtools::install_github("seananderson/ggsidekick") ## not on CRAN 
library(ggsidekick); theme_set(theme_sleek())

# Load functions
home <- here::here()
fxn <- list.files(paste0(home, "/R/functions"))
invisible(sapply(FUN = source, paste0(home, "/R/functions/", fxn)))

Load cache

# qwraps2::lazyload_cache_dir(path = paste0(home, "/R/analyze-data/02-fit-vbge_cache/html"))

Read and trim data

d <- #read_csv(paste0(home, "/data/for-analysis/dat.csv")) %>% 
  read_csv("https://raw.githubusercontent.com/maxlindmark/perch-growth/master/data/for-analysis/dat.csv") %>% 
  filter(age_ring == "Y", # use only length-at-age by filtering on age_ring
         !area %in% c("VN", "TH")) 
Rows: 338460 Columns: 12
── Column specification ────────────────────────────────────────────────────────
Delimiter: ","
chr (6): age_ring, area, gear, ID, sex, keep
dbl (6): length_mm, age_bc, age_catch, catch_year, cohort, final_length

ℹ Use `spec()` to retrieve the full column specification for this data.
ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
filter: removed 43,886 rows (13%), 294,574 rows remaining
# Minimum number of observations per area and cohort
d$area_cohort <- as.factor(paste(d$area, d$cohort))

d <- d %>%
  group_by(area_cohort) %>% 
  filter(n() > 100) %>% 
  ungroup()
group_by: one grouping variable (area_cohort)
filter (grouped): removed 2,637 rows (1%), 291,937 rows remaining
ungroup: no grouping variables
# Minimum number of observations per area, cohort, and age
d$area_cohort_age <- as.factor(paste(d$area, d$cohort, d$age_bc))

d <- d %>%
  group_by(area_cohort_age) %>% 
  filter(n() > 10) %>% 
  ungroup()
group_by: one grouping variable (area_cohort_age)
filter (grouped): removed 3,521 rows (1%), 288,416 rows remaining
ungroup: no grouping variables
# Minimum number of cohorts in a given area
cnt <- d %>%
  group_by(area) %>%
  summarise(n = n_distinct(cohort)) %>%
  filter(n >= 10)
group_by: one grouping variable (area)
summarise: now 10 rows and 2 columns, ungrouped
filter: no rows removed
d <- d[d$area %in% cnt$area, ]

# Plot cleaned data
ggplot(d, aes(age_bc, length_mm)) +
  geom_jitter(size = 0.1, height = 0, alpha = 0.1) +
  scale_x_continuous(breaks = seq(20)) +
  theme(axis.text.x = element_text(angle = 0)) +
  theme(axis.text = element_text(size = 12), axis.title = element_text(size = 15)) +
  labs(x = "Age", y = "Length (mm)") +
  guides(color = "none") + 
  facet_wrap(~area, scale = "free_x")

# Longitude and latitude attributes for each area
area <- c("BS", "BT", "FB", "FM", "HO", "JM", "MU", "RA", "SI_EK", "SI_HA", "TH")
nareas <- length(area)
lat <- c(60, 60.4, 60.3, 60.5, 63.7, 58, 59, 65.9, 57.3, 57.4, 56.1)
lon <- c(21.5, 18.1, 19.5, 18, 20.9, 16.8, 18.1, 22.3, 16.6, 16.7, 15.9)
area_attr <- data.frame(cbind(area = area, lat = lat, lon = lon)) %>%
  mutate_at(c("lat", "lon"), as.numeric)
mutate_at: converted 'lat' from character to double (0 new NA)
           converted 'lon' from character to double (0 new NA)

Fit VBGE models

# Get individual growth parameters (functions: VBGF/Gompertz,nls_out,fit_nls)
IVBG <- d %>%
  group_by(ID) %>%
  summarize(nls_out(fit_nls(length_mm, age_bc, min_nage = 5, model = "VBGF"))) %>% 
  ungroup()
group_by: one grouping variable (ID)
summarize: now 75,245 rows and 5 columns, ungrouped
ungroup: no grouping variables
# Test what's going on ... 

# expand_grid(k = c(0.1, 0.2),
#             linf = c(300, 400)) %>% 
#   crossing(age_bc = 1:100) %>% 
#   mutate(length_mm_pred = linf*(1-exp(-k*age_bc)),
#          group = paste0("k = ", k, " and Linf = ", linf)) %>% 
#   ggplot(aes(age_bc, length_mm_pred, color = group)) + 
#   geom_line()
IVBG <- IVBG %>% drop_na(k) # The NAs are because the model didn't converge or because they were below the threshold age
drop_na: removed 51,638 rows (69%), 23,607 rows remaining
IVBG <- IVBG %>%
  mutate(k_lwr = k - 1.96*k_se,
         k_upr = k + 1.96*k_se,
         linf_lwr = linf - 1.96*linf_se,
         linf_upr = linf + 1.96*linf_se,
         A = k*linf*0.65,
         row_id = row_number())
mutate: new variable 'k_lwr' (double) with 23,605 unique values and 0% NA
        new variable 'k_upr' (double) with 23,605 unique values and 0% NA
        new variable 'linf_lwr' (double) with 23,605 unique values and 0% NA
        new variable 'linf_upr' (double) with 23,605 unique values and 0% NA
        new variable 'A' (double) with 23,605 unique values and 0% NA
        new variable 'row_id' (integer) with 23,607 unique values and 0% NA
# Add back cohort and area variables
IVBG <- IVBG %>% 
  left_join(d %>% select(ID, area_cohort)) %>% 
  separate(area_cohort, into = c("area", "cohort"), remove = TRUE, sep = " ") %>% 
  mutate(cohort = as.numeric(cohort))
select: dropped 12 variables (length_mm, age_bc, age_catch, age_ring, area, …)
Joining with `by = join_by(ID)`
left_join: added one column (area_cohort)
> rows only in x 0
> rows only in y (146,384)
> matched rows 142,032 (includes duplicates)
> =========
> rows total 142,032
mutate: converted 'cohort' from character to double (0 new NA)
# Compare how the means and quantiles differ depending on this filtering
IVBG_filter <- IVBG %>%
  filter(k_se < quantile(k_se, probs = 0.95))
filter: removed 7,102 rows (5%), 134,930 rows remaining
# Summarize growth coefficients by cohort and area
VBG <- IVBG %>%
  filter(k_se < k) %>% # new!
  group_by(cohort, area) %>%
  summarize(k = quantile(k, prob = 0.5, na.rm = T),
            A = quantile(A, prob = 0.5, na.rm = T),
            linf = quantile(linf, prob = 0.5, na.rm = T),
            k_lwr = quantile(k, prob = 0.05, na.rm = T),
            k_upr = quantile(k, prob = 0.95, na.rm = T)) %>% 
  ungroup()
filter: removed 4,201 rows (3%), 137,831 rows remaining
group_by: 2 grouping variables (cohort, area)
summarize: now 306 rows and 7 columns, one group variable remaining (cohort)
ungroup: no grouping variables

Calculate sample size

sample_size <- IVBG %>%
  group_by(area) %>% 
  summarise(n_cohort = length(unique(cohort)),
            min_cohort = min(cohort),
            max_cohort = max(cohort),
            n_individuals = length(unique(ID)),
            n_data_points = n())
group_by: one grouping variable (area)
summarise: now 10 rows and 6 columns, ungrouped
sample_size
# A tibble: 10 × 6
   area  n_cohort min_cohort max_cohort n_individuals n_data_points
   <chr>    <int>      <dbl>      <dbl>         <int>         <int>
 1 BS          17       1985       2001          1334          7688
 2 BT          22       1972       1995           961          5371
 3 FB          47       1969       2015          6040         37381
 4 FM          37       1963       1999          5055         32632
 5 HO          29       1982       2015          1151          6215
 6 JM          60       1953       2014          4866         28739
 7 MU          18       1981       1999          1105          6671
 8 RA          14       1985       2003           571          3115
 9 SI_EK       24       1968       2011           659          3577
10 SI_HA       38       1967       2013          1865         10643
sample_size %>%
  ungroup() %>%
  summarise(sum_ind = sum(n_individuals), sum_n = sum(n_data_points))
ungroup: no grouping variables
summarise: now one row and 2 columns, ungrouped
# A tibble: 1 × 2
  sum_ind  sum_n
    <int>  <int>
1   23607 142032
write_csv(sample_size, paste0(home, "/output/sample_size.csv"))

Add GAM-predicted temperature to growth models

pred_temp <- read_csv(paste0(home, "/output/gam_predicted_temps.csv")) %>% 
  rename(cohort = year)
Rows: 380 Columns: 4
── Column specification ────────────────────────────────────────────────────────
Delimiter: ","
chr (2): area, model
dbl (2): year, temp

ℹ Use `spec()` to retrieve the full column specification for this data.
ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
rename: renamed one variable (cohort)
VBG <- VBG %>%
  left_join(pred_temp, by = c("area", "cohort"))
left_join: added 2 columns (temp, model)
           > rows only in x     0
           > rows only in y  ( 74)
           > matched rows     306
           >                 =====
           > rows total       306
# Save data for map-plot
cohort_sample_size <- IVBG %>%
  group_by(area, cohort) %>% 
  summarise(n = n()) # individuals, not samples!
group_by: 2 grouping variables (area, cohort)
summarise: now 306 rows and 3 columns, one group variable remaining (area)
VBG <- left_join(VBG, cohort_sample_size, by = c("cohort", "area"))
left_join: added one column (n)
           > rows only in x     0
           > rows only in y  (  0)
           > matched rows     306
           >                 =====
           > rows total       306
write_csv(VBG, paste0(home, "/output/vbg.csv"))

# Calculate the plotting order (also used for map plot)
order <- VBG %>%
  ungroup() %>%
  group_by(area) %>%
  summarise(mean_temp = mean(temp)) %>%
  arrange(desc(mean_temp))
ungroup: no grouping variables
group_by: one grouping variable (area)
summarise: now 10 rows and 2 columns, ungrouped
order
# A tibble: 10 × 2
   area  mean_temp
   <chr>     <dbl>
 1 SI_HA     13.6 
 2 BT        13.1 
 3 FM         8.90
 4 JM         8.77
 5 BS         8.44
 6 FB         8.16
 7 SI_EK      8.03
 8 MU         6.85
 9 HO         6.58
10 RA         6.50
write_csv(order, paste0(home, "/output/ranked_temps.csv"))

nareas <- length(unique(order$area)) + 2 # to skip the brightest colors that are hard to read
colors <- colorRampPalette(brewer.pal(name = "RdYlBu", n = 10))(nareas)[-c(6,7)]

Plot VBGE fits

# Sample 30 IDs per area and plot their data and VBGE fits
set.seed(4)
ids <- IVBG %>% distinct(ID, .keep_all = TRUE) %>% group_by(area) %>% slice_sample(n = 30)
distinct: removed 118,425 rows (83%), 23,607 rows remaining
group_by: one grouping variable (area)
slice_sample (grouped): removed 23,307 rows (99%), 300 rows remaining
IVBG2 <- IVBG %>%
  filter(ID %in% ids$ID) %>% 
  distinct(ID, .keep_all = TRUE) %>% 
  select(ID, k, linf)
filter: removed 140,311 rows (99%), 1,721 rows remaining
distinct: removed 1,421 rows (83%), 300 rows remaining
select: dropped 10 variables (k_se, linf_se, k_lwr, k_upr, linf_lwr, …)
d2 <- d %>%
  ungroup() %>%
  filter(ID %in% ids$ID) %>%
  left_join(IVBG2, by = "ID") %>%
  mutate(length_mm_pred = linf*(1-exp(-k*age_bc)))
ungroup: no grouping variables
filter: removed 286,695 rows (99%), 1,721 rows remaining
left_join: added 2 columns (k, linf)
           > rows only in x       0
           > rows only in y  (    0)
           > matched rows     1,721
           >                 =======
           > rows total       1,721
mutate: new variable 'length_mm_pred' (double) with 1,714 unique values and 0% NA
fits_ind <- ggplot(d2, aes(age_bc, length_mm, group = ID, color = ID)) +
  geom_jitter(width = 0.3, height = 0, alpha = 0.5, size = 0.4) +
  geom_line(aes(age_bc, length_mm_pred, group = ID, color = ID),
            inherit.aes = FALSE, alpha = 0.8, linewidth = 0.3) + 
  guides(color = "none") +
  scale_color_viridis(discrete = TRUE, name = "Site", option = "cividis") +
  scale_x_continuous(breaks = seq(1, 20, by = 2)) + 
  labs(x = "Age (years)", y = "Length (mm)") +
  facet_wrap(~factor(area, levels = (arrange(area_attr, desc(lat)))$area), ncol = 5)

A <- IVBG %>% 
  ggplot(aes(factor(area, order$area), A, fill = factor(area, order$area))) + 
  coord_cartesian(ylim = c(22, 90)) +
  geom_violin(alpha = 0.8, color = NA) +
  geom_boxplot(outlier.color = NA, width = 0.2, alpha = 0.9, fill = NA, size = 0.4) +
  scale_fill_manual(values = colors, name = "Site") +
  scale_color_manual(values = colors, name = "Site") +
  guides(fill = "none", color = "none") +
  labs(x = "Site", y = "Growth coefficient (*A*)") +
  theme(axis.title.y = ggtext::element_markdown())

fits_ind / A + plot_annotation(tag_levels = "A") #+ plot_layout(heights = c(1, 1.8))

ggsave(paste0(home, "/figures/vb_pars_fits.pdf" ), width = 17, height = 18, units = "cm")

# Supp plot for K and Linf
k <- IVBG %>% 
  ggplot(aes(factor(area, order$area), k, fill = factor(area, order$area))) + 
  coord_cartesian(ylim = c(0, 0.7)) +
  geom_violin(alpha = 0.8, color = NA) +  
  geom_boxplot(outlier.color = NA, width = 0.2, alpha = 0.9, fill = NA, size = 0.4) +
  scale_fill_manual(values = colors, name = "Site") +
  guides(fill = "none", color = "none") +
  labs(x = "Site", y = expression(italic(k)))

linf <- IVBG %>% 
  filter(linf < 2300) %>% 
  filter(linf > 130) %>% 
  ggplot(aes(factor(area, order$area), linf, fill = factor(area, order$area))) + 
  geom_violin(alpha = 0.8, color = NA) +  
  geom_boxplot(outlier.color = NA, width = 0.1, alpha = 0.9, fill = NA, size = 0.4) +
  coord_cartesian(ylim = c(0, 2000)) +
  scale_fill_manual(values = colors, name = "Site") +
  guides(fill = "none", color = "none") +
  labs(x = "", y = expression(paste(italic(L[infinity]), " [mm]")))
filter: removed 1,227 rows (1%), 140,805 rows remaining
filter: no rows removed
k / linf

ggsave(paste0(home, "/figures/supp/vb_k_linf.pdf" ), width = 17, height = 18, units = "cm")

Fit Models

Overall, \(A\) looks very linearly related to temperature in most cases, even when I add gams on top

dat <- VBG %>% 
  mutate(temp_sc = (temp - mean(temp)) / sd(temp),
         temp_sq_sc = temp_sc * temp_sc,
         area_f = as.factor(area))
mutate: new variable 'temp_sc' (double) with 294 unique values and 0% NA
        new variable 'temp_sq_sc' (double) with 294 unique values and 0% NA
        new variable 'area_f' (factor) with 10 unique values and 0% NA
# Non-scaled x-axis
scatter <- ggplot(dat, aes(temp, A, color = factor(area, order$area)), size = 0.6) +
  geom_point(size = 0.9) +
  scale_color_manual(values = colors, name = "Site") +
  scale_fill_manual(values = colors, name = "Site")
  
scatter +
  geom_smooth(method = "gam", formula = y~s(x, k=3), se = FALSE)

Now we’ll compare a bunch of models different temperature shapes and site-effects

# Quadratic effect of temperature
m1 <- brm(A ~ area_f + temp_sc + temp_sq_sc,
          data = dat,
          cores = 4,
          chains = 4,
          iter = 4000,
          family = student,
          prior = set_prior("normal(0,5)", class = "b"),
          save_pars = save_pars(all = TRUE),
          seed = 99
          )

# Interaction between area and temperature
m2 <- brm(A ~ area_f * temp_sc,
          data = dat,
          cores = 4,
          chains = 4,
          iter = 4000,
          family = student,
          prior = set_prior("normal(0,5)", class = "b"),
          save_pars = save_pars(all = TRUE),
          seed = 99
          )

# Interaction between area and temperature but common squared term
m3 <- brm(A ~ area_f * temp_sc + temp_sq_sc,
          data = dat,
          cores = 4,
          chains = 4,
          iter = 4000,
          family = student,
          prior = set_prior("normal(0,5)", class = "b"),
          save_pars = save_pars(all = TRUE),
          seed = 99
          )

# Quadratic effect of temperature, full random 
m4 <- brm(A ~ temp_sc + temp_sq_sc + (1 + temp_sc + temp_sq_sc | area_f),
          data = dat,
          cores = 4,
          chains = 4,
          iter = 4000,
          family = student,
          prior = set_prior("normal(0,5)", class = "b"),
          save_pars = save_pars(all = TRUE),
          seed = 99
          )

# random intercept and slope?
m5 <- brm(A ~ temp_sc + (1 + temp_sc | area_f),
          data = dat,
          cores = 4,
          chains = 4,
          iter = 4000,
          family = student,
          prior = set_prior("normal(0,5)", class = "b"),
          save_pars = save_pars(all = TRUE),
          control = list(adapt_delta = 0.95),
          seed = 99
          )

Compare all models with loo

loo(m1, m2, m3, m4, m5,
    moment_match = TRUE)
Output of model 'm1':

Computed from 8000 by 306 log-likelihood matrix.

         Estimate   SE
elpd_loo   -886.9 16.6
p_loo        13.8  0.9
looic      1773.7 33.1
------
MCSE of elpd_loo is 0.0.
MCSE and ESS estimates assume MCMC draws (r_eff in [0.3, 1.7]).

All Pareto k estimates are good (k < 0.7).
See help('pareto-k-diagnostic') for details.

Output of model 'm2':

Computed from 8000 by 306 log-likelihood matrix.

         Estimate   SE
elpd_loo   -880.8 16.7
p_loo        20.5  1.7
looic      1761.6 33.4
------
MCSE of elpd_loo is 0.1.
MCSE and ESS estimates assume MCMC draws (r_eff in [0.3, 1.7]).

All Pareto k estimates are good (k < 0.7).
See help('pareto-k-diagnostic') for details.

Output of model 'm3':

Computed from 8000 by 306 log-likelihood matrix.

         Estimate   SE
elpd_loo   -881.6 16.7
p_loo        21.5  1.8
looic      1763.2 33.5
------
MCSE of elpd_loo is 0.1.
MCSE and ESS estimates assume MCMC draws (r_eff in [0.4, 1.8]).

All Pareto k estimates are good (k < 0.7).
See help('pareto-k-diagnostic') for details.

Output of model 'm4':

Computed from 8000 by 306 log-likelihood matrix.

         Estimate   SE
elpd_loo   -881.7 16.7
p_loo        21.2  2.2
looic      1763.3 33.5
------
MCSE of elpd_loo is 0.1.
MCSE and ESS estimates assume MCMC draws (r_eff in [0.2, 1.0]).

All Pareto k estimates are good (k < 0.7).
See help('pareto-k-diagnostic') for details.

Output of model 'm5':

Computed from 8000 by 306 log-likelihood matrix.

         Estimate   SE
elpd_loo   -881.0 16.7
p_loo        17.8  1.6
looic      1762.1 33.4
------
MCSE of elpd_loo is 0.1.
MCSE and ESS estimates assume MCMC draws (r_eff in [0.5, 1.5]).

All Pareto k estimates are good (k < 0.7).
See help('pareto-k-diagnostic') for details.

Model comparisons:
   elpd_diff se_diff
m2  0.0       0.0   
m5 -0.2       2.1   
m3 -0.8       0.5   
m4 -0.9       2.4   
m1 -6.1       4.1   

Plot the favorite model

performance::r2_bayes(m4)
# Bayesian R2 with Compatibility Interval

  Conditional R2: 0.270 (95% CI [0.200, 0.338])
     Marginal R2: 0.286 (95% CI [0.071, 0.514])
prior_summary(m4)
                   prior     class       coef  group resp dpar nlpar lb ub
             normal(0,5)         b                                        
             normal(0,5)         b    temp_sc                             
             normal(0,5)         b temp_sq_sc                             
 student_t(3, 43.4, 4.4) Intercept                                        
    lkj_corr_cholesky(1)         L                                        
    lkj_corr_cholesky(1)         L            area_f                      
           gamma(2, 0.1)        nu                                    1   
    student_t(3, 0, 4.4)        sd                                    0   
    student_t(3, 0, 4.4)        sd            area_f                  0   
    student_t(3, 0, 4.4)        sd  Intercept area_f                  0   
    student_t(3, 0, 4.4)        sd    temp_sc area_f                  0   
    student_t(3, 0, 4.4)        sd temp_sq_sc area_f                  0   
    student_t(3, 0, 4.4)     sigma                                    0   
       source
         user
 (vectorized)
 (vectorized)
      default
      default
 (vectorized)
      default
      default
 (vectorized)
 (vectorized)
 (vectorized)
 (vectorized)
      default
m4
Warning: There were 2 divergent transitions after warmup. Increasing
adapt_delta above 0.8 may help. See
http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
 Family: student 
  Links: mu = identity; sigma = identity; nu = identity 
Formula: A ~ temp_sc + temp_sq_sc + (1 + temp_sc + temp_sq_sc | area_f) 
   Data: dat (Number of observations: 306) 
  Draws: 4 chains, each with iter = 4000; warmup = 2000; thin = 1;
         total post-warmup draws = 8000

Group-Level Effects: 
~area_f (Number of levels: 10) 
                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(Intercept)                 1.80      0.79     0.55     3.61 1.00     1956
sd(temp_sc)                   2.20      0.93     0.72     4.41 1.00     1065
sd(temp_sq_sc)                1.16      0.80     0.07     3.12 1.00     1511
cor(Intercept,temp_sc)        0.46      0.34    -0.35     0.93 1.00     2947
cor(Intercept,temp_sq_sc)     0.19      0.44    -0.70     0.91 1.00     4581
cor(temp_sc,temp_sq_sc)       0.45      0.42    -0.59     0.96 1.00     3515
                          Tail_ESS
sd(Intercept)                 2951
sd(temp_sc)                    625
sd(temp_sq_sc)                 682
cor(Intercept,temp_sc)        3097
cor(Intercept,temp_sq_sc)     4149
cor(temp_sc,temp_sq_sc)       4534

Population-Level Effects: 
           Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept     44.00      0.71    42.52    45.38 1.00     3503     4669
temp_sc        3.06      1.01     1.08     5.21 1.00     1597      589
temp_sq_sc     0.00      0.65    -1.29     1.49 1.00     1512      554

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     3.40      0.27     2.88     3.95 1.00     2647      706
nu        5.91      2.47     3.14    11.95 1.00     3846     4230

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
# Predict with the main model
m4_pred <- dat %>%
  group_by(area) %>% 
  data_grid(temp_sc = seq_range(temp_sc, n = 100)) %>% 
  ungroup() %>% 
  mutate(area_f = as.factor(area),
         temp = (temp_sc * sd(VBG$temp)) + mean(VBG$temp),
         temp_sq_sc = temp_sc*temp_sc) %>% 
  add_epred_draws(m4)

# Global prediction?
m4_pred_glob <- dat %>%
  data_grid(temp_sc = seq_range(temp_sc, n = 100)) %>% 
  mutate(temp = (temp_sc * sd(VBG$temp)) + mean(VBG$temp),
         temp_sq_sc = temp_sc*temp_sc) %>% 
  add_epred_draws(m4, re_formula = NA)

# Plot area specific predictions as facets
p0 <- scatter + 
  # scale_color_viridis(discrete = TRUE) +
  # scale_fill_viridis(discrete = TRUE) +
  stat_lineribbon(data = m4_pred,
                  aes(temp, y = .epred,
                      fill = factor(area, order$area)),
                  alpha = 0.1, .width = 0.95, linewidth = 0.9) +
  stat_lineribbon(data = m4_pred,
                  aes(temp, y = .epred,
                      fill = factor(area, order$area)),
                  alpha = 1, .width = 0, linewidth = 0.9) +
  # stat_lineribbon(data = m4_pred_glob,
  #                 aes(temp, y = .epred), inherit.aes = FALSE, color = "grey30", fill = "grey30",
  #                 alpha = 1, .width = 0, linewidth = 0.9) + 
  guides(fill = "none", color = guide_legend(nrow = 1, reverse = TRUE,
                                             title.position = "top", title.hjust = 0.5,
                                             override.aes = list(fill = NA),
                                             position = "inside")) + 
  
  theme(legend.position.inside = c(0.5, 0.05),
        axis.title.y = ggtext::element_markdown()) + 
  labs(x = "Mean site temperature [°C]", y = "Size-corrected growth coefficient (*A*)",
       color = "")

p0

ggsave(paste0(home, "/figures/a_conditional.pdf" ), width = 17, height = 17, units = "cm")

# Facet
# scatter + 
#   stat_lineribbon(data = m5_pred,
#                   aes(temp, y = .epred,
#                       fill = factor(area, order$area)),
#                   alpha = 0.1, .width = 0.95) + 
#   stat_lineribbon(data = m5_pred,
#                   aes(temp, y = .epred,
#                       fill = factor(area, order$area)),
#                   alpha = 1, .width = 0) + 
#   facet_wrap(~area)
# 
# ggsave(paste0(home, "/figures/supp/A_by_area.pdf" ), width = 17, height = 17, units = "cm")


# Look at the slopes and intercepts by area
mean_temps <- pred_temp %>%
  group_by(area) %>%
  summarise(mean_temp = mean(temp))
 
# p1 <- m4 %>%
#   spread_draws(b_temp_sc, r_area_f[temp_sc,]) %>%
#   median_qi(slope = b_temp_sc + r_area_f, .width = c(.9)) %>%
#   mutate(area = temp_sc) %>% 
#   left_join(mean_temps, by = "area") %>% 
#   ggplot(aes(y = slope, x = mean_temp, ymin = .lower, ymax = .upper,
#              color = factor(area, order$area),
#              fill = factor(area, order$area))) +
#   geom_hline(yintercept = 0, linetype = 2, alpha = 0.5, linewidth = 0.8) + 
#   geom_smooth(method = "lm", inherit.aes = FALSE,
#               aes(mean_temp, slope), alpha = 0.15, color = "grey") +  
#   geom_point() +
#   geom_errorbar(alpha = 0.3) +
#   scale_color_manual(values = colors, name = "Site") + 
#   scale_fill_manual(values = colors, name = "Site") + 
#   labs(y = "Site specific slope", x = "Mean site temperature (°C)") + 
#   guides(fill = "none",
#          color = "none") +
#   theme(axis.title.y = ggtext::element_markdown())
# 
p2 <- m4 %>%
  spread_draws(b_Intercept, r_area_f[Intercept,]) %>%
  median_qi(intercept = b_Intercept + r_area_f, .width = c(.9)) %>%
  mutate(area = Intercept) %>%
  left_join(mean_temps, by = "area") %>%
  ggplot(aes(y = intercept, x = mean_temp, ymin = .lower, ymax = .upper,
             color = factor(area, order$area),
             fill = factor(area, order$area))) +
  geom_smooth(method = "lm", inherit.aes = FALSE, linetype = 2,
              aes(mean_temp, intercept), alpha = 0.15, color = "grey") +
  geom_point(size = 2) +
  geom_errorbar(alpha = 0.3) +
  scale_color_manual(values = colors, name = "Site") +
  scale_fill_manual(values = colors, name = "Site") +
  labs(y = "Site-specific intercept", x = "Mean site temperature (°C)") +
  guides(fill = "none",
         color = "none") +
  theme(axis.title.y = ggtext::element_markdown())

p2 

ggsave(paste0(home, "/figures/random_intercepts.pdf" ), width = 11, height = 11, units = "cm")

 
# p0 / ((p2 + p1) + plot_layout(axes = "collect")) +
#   plot_annotation(tag_level = "A") + 
#   plot_layout(heights = c(2, 1))
# 
# ggsave(paste0(home, "/figures/a_conditional.pdf" ), width = 17, height = 20, units = "cm")

# Check significance of slopes?!
# get_variables(m5)
# t_inter <- m5 %>%
#   spread_draws(b_Intercept, r_area_f[Intercept,]) %>%
#   median_qi(intercept = b_Intercept + r_area_f, .width = c(.9)) %>%
#   mutate(area = Intercept) %>%
#   left_join(mean_temps, by = "area")
# 
# t_slope <- m5 %>%
#   spread_draws(b_temp_sc, r_area_f[temp_sc,]) %>%
#   median_qi(slope = b_temp_sc + r_area_f, .width = c(.9)) %>%
#   mutate(area = temp_sc) %>%
#   left_join(mean_temps, by = "area")
# 
# tidy(lm(intercept ~ mean_temp, data = t_inter))
# tidy(lm(slope ~ mean_temp, data = t_slope))

Plot conceptual model

get_variables(m4)
 [1] "b_Intercept"                       "b_temp_sc"                        
 [3] "b_temp_sq_sc"                      "sd_area_f__Intercept"             
 [5] "sd_area_f__temp_sc"                "sd_area_f__temp_sq_sc"            
 [7] "cor_area_f__Intercept__temp_sc"    "cor_area_f__Intercept__temp_sq_sc"
 [9] "cor_area_f__temp_sc__temp_sq_sc"   "sigma"                            
[11] "nu"                                "Intercept"                        
[13] "r_area_f[BS,Intercept]"            "r_area_f[BT,Intercept]"           
[15] "r_area_f[FB,Intercept]"            "r_area_f[FM,Intercept]"           
[17] "r_area_f[HO,Intercept]"            "r_area_f[JM,Intercept]"           
[19] "r_area_f[MU,Intercept]"            "r_area_f[RA,Intercept]"           
[21] "r_area_f[SI_EK,Intercept]"         "r_area_f[SI_HA,Intercept]"        
[23] "r_area_f[BS,temp_sc]"              "r_area_f[BT,temp_sc]"             
[25] "r_area_f[FB,temp_sc]"              "r_area_f[FM,temp_sc]"             
[27] "r_area_f[HO,temp_sc]"              "r_area_f[JM,temp_sc]"             
[29] "r_area_f[MU,temp_sc]"              "r_area_f[RA,temp_sc]"             
[31] "r_area_f[SI_EK,temp_sc]"           "r_area_f[SI_HA,temp_sc]"          
[33] "r_area_f[BS,temp_sq_sc]"           "r_area_f[BT,temp_sq_sc]"          
[35] "r_area_f[FB,temp_sq_sc]"           "r_area_f[FM,temp_sq_sc]"          
[37] "r_area_f[HO,temp_sq_sc]"           "r_area_f[JM,temp_sq_sc]"          
[39] "r_area_f[MU,temp_sq_sc]"           "r_area_f[RA,temp_sq_sc]"          
[41] "r_area_f[SI_EK,temp_sq_sc]"        "r_area_f[SI_HA,temp_sq_sc]"       
[43] "lprior"                            "lp__"                             
[45] "z_1[1,1]"                          "z_1[2,1]"                         
[47] "z_1[3,1]"                          "z_1[1,2]"                         
[49] "z_1[2,2]"                          "z_1[3,2]"                         
[51] "z_1[1,3]"                          "z_1[2,3]"                         
[53] "z_1[3,3]"                          "z_1[1,4]"                         
[55] "z_1[2,4]"                          "z_1[3,4]"                         
[57] "z_1[1,5]"                          "z_1[2,5]"                         
[59] "z_1[3,5]"                          "z_1[1,6]"                         
[61] "z_1[2,6]"                          "z_1[3,6]"                         
[63] "z_1[1,7]"                          "z_1[2,7]"                         
[65] "z_1[3,7]"                          "z_1[1,8]"                         
[67] "z_1[2,8]"                          "z_1[3,8]"                         
[69] "z_1[1,9]"                          "z_1[2,9]"                         
[71] "z_1[3,9]"                          "z_1[1,10]"                        
[73] "z_1[2,10]"                         "z_1[3,10]"                        
[75] "L_1[1,1]"                          "L_1[2,1]"                         
[77] "L_1[3,1]"                          "L_1[1,2]"                         
[79] "L_1[2,2]"                          "L_1[3,2]"                         
[81] "L_1[1,3]"                          "L_1[2,3]"                         
[83] "L_1[3,3]"                          "accept_stat__"                    
[85] "stepsize__"                        "treedepth__"                      
[87] "n_leapfrog__"                      "divergent__"                      
[89] "energy__"                         
par <- m4 %>%
  spread_draws(b_Intercept, b_temp_sc, b_temp_sq_sc,
               `r_area_f[FM,Intercept]`, `r_area_f[FM,temp_sc]`, `r_area_f[FM,temp_sq_sc]`) %>% 
  ungroup() %>% 
  mutate(intercept = b_Intercept + `r_area_f[FM,Intercept]`,
         b1 = b_temp_sc + `r_area_f[FM,temp_sc]`,
         b2 = b_temp_sq_sc + `r_area_f[FM,temp_sq_sc]`) %>% 
  summarise(intercept = mean(intercept),
            b1 = mean(b1),
            b2 = mean(b2))
ungroup: no grouping variables
mutate: new variable 'intercept' (double) with 7,885 unique values and 0% NA
        new variable 'b1' (double) with 7,885 unique values and 0% NA
        new variable 'b2' (double) with 7,885 unique values and 0% NA
summarise: now one row and 3 columns, ungrouped
# No adaptation
no_adapt_1 <- tibble(temp = seq(5, 15, length.out = 500)) %>% 
  mutate(temp_sc = (temp - mean(VBG$temp) / sd(VBG$temp)),
         temp_sc_sq = temp_sc*temp_sc) %>% 
  mutate(growth_rate = par$intercept + temp_sc*(par$b1 * 3) + temp_sc_sq*(par$b2*0.08),
         pop = "Cold")
mutate: new variable 'temp_sc' (double) with 500 unique values and 0% NA
        new variable 'temp_sc_sq' (double) with 500 unique values and 0% NA
mutate: new variable 'growth_rate' (double) with 500 unique values and 0% NA
        new variable 'pop' (character) with one unique value and 0% NA
no_adapt_2 <- tibble(temp = seq(10, 20, length.out = 500)) %>% 
  mutate(temp_sc = (temp - mean(VBG$temp) / sd(VBG$temp)),
         temp_sc_sq = temp_sc*temp_sc) %>% 
  mutate(growth_rate = par$intercept + temp_sc*(par$b1 * 3) + temp_sc_sq*(par$b2*0.08),
         pop = "Medium",
         growth_rate = growth_rate + 0.5)
mutate: new variable 'temp_sc' (double) with 500 unique values and 0% NA
        new variable 'temp_sc_sq' (double) with 500 unique values and 0% NA
mutate: new variable 'growth_rate' (double) with 500 unique values and 0% NA
        new variable 'pop' (character) with one unique value and 0% NA
no_adapt_3 <- tibble(temp = seq(15, 25, length.out = 500)) %>% 
  mutate(temp_sc = (temp - mean(VBG$temp) / sd(VBG$temp)),
         temp_sc_sq = temp_sc*temp_sc) %>% 
  mutate(growth_rate = par$intercept + temp_sc*(par$b1 * 3) + temp_sc_sq*(par$b2*0.08),
         pop = "Warm",
         growth_rate = growth_rate + 1)
mutate: new variable 'temp_sc' (double) with 500 unique values and 0% NA
        new variable 'temp_sc_sq' (double) with 500 unique values and 0% NA
mutate: new variable 'growth_rate' (double) with 500 unique values and 0% NA
        new variable 'pop' (character) with one unique value and 0% NA
no_adapt <- bind_rows(no_adapt_1, no_adapt_2, no_adapt_3)

p1 <- ggplot(no_adapt, aes(temp, growth_rate, color = pop)) + 
  geom_line(linewidth = 1.2, alpha = 0.8) + 
  labs(tag = "No adaptation", y = "Increasing growth rate →", x = "Increasing temperature →", color = "Site") +
  theme(plot.tag = element_text(size = 9),
        plot.tag.position = c(0.23, 0.94),
        legend.position.inside = c(0.5, 0.1),
        axis.text.y = element_blank(),
        axis.ticks.y = element_blank(),
        axis.text.x = element_blank(),
        axis.ticks.x = element_blank(), 
        legend.direction = "horizontal") + 
  guides(color = guide_legend(position = "inside", title.position = "top", title.hjust = 0.5))


p1

# Now do "local adaption
adapt_1 <- tibble(temp = seq(5, 15, length.out = 500)) %>% 
  mutate(temp_sc = (temp - mean(VBG$temp) / sd(VBG$temp)),
         temp_sc_sq = temp_sc*temp_sc) %>% 
  mutate(growth_rate = par$intercept + temp_sc*(par$b1 * 4) + temp_sc_sq*(par$b2*0.18),
         pop = "Cold")
mutate: new variable 'temp_sc' (double) with 500 unique values and 0% NA
        new variable 'temp_sc_sq' (double) with 500 unique values and 0% NA
mutate: new variable 'growth_rate' (double) with 500 unique values and 0% NA
        new variable 'pop' (character) with one unique value and 0% NA
adapt_2 <- tibble(temp = seq(10, 20, length.out = 500)) %>% 
  mutate(temp_sc = (temp - mean(VBG$temp) / sd(VBG$temp)),
         temp_sc_sq = temp_sc*temp_sc) %>% 
  #mutate(growth_rate = par$intercept*0.95 + temp_sc*(par$b1 * 3.5) + temp_sc_sq*(par$b2*0.1),
  mutate(growth_rate = par$intercept*0.976 + temp_sc*(par$b1 * 3) + temp_sc_sq*(par$b2*0.08),
         pop = "Medium",
         growth_rate = growth_rate + 1)
mutate: new variable 'temp_sc' (double) with 500 unique values and 0% NA
        new variable 'temp_sc_sq' (double) with 500 unique values and 0% NA
mutate: new variable 'growth_rate' (double) with 500 unique values and 0% NA
        new variable 'pop' (character) with one unique value and 0% NA
adapt_3 <- tibble(temp = seq(15, 25, length.out = 500)) %>% 
  mutate(temp_sc = (temp - mean(VBG$temp) / sd(VBG$temp)),
         temp_sc_sq = temp_sc*temp_sc) %>% 
  mutate(growth_rate = par$intercept*0.715 + temp_sc*(par$b1 * 5) + temp_sc_sq*(par$b2*0.11),
         pop = "Warm",
         growth_rate = growth_rate + 2)
mutate: new variable 'temp_sc' (double) with 500 unique values and 0% NA
        new variable 'temp_sc_sq' (double) with 500 unique values and 0% NA
mutate: new variable 'growth_rate' (double) with 500 unique values and 0% NA
        new variable 'pop' (character) with one unique value and 0% NA
# ggplot(adapt_3, aes(temp, growth_rate, color = pop)) + 
#   geom_line(linewidth = 1.2, alpha = 0.8) 

adapt <- bind_rows(adapt_1, adapt_2, adapt_3)

p2 <- 
  ggplot(adapt, aes(temp, growth_rate, color = pop)) + 
  geom_line(linewidth = 1.2, alpha = 0.8) + 
  labs(tag = "Local adaptation", y = "Increasing growth rate →", x = "Increasing temperature →") +
  theme(plot.tag = element_text(size = 9),
        plot.tag.position = c(0.2, 0.94),
        axis.text.y = element_blank(),
        axis.ticks.y = element_blank(),
        axis.text.x = element_blank(),
        axis.ticks.x = element_blank()) + 
  guides(color = "none")

p1 + p2 + 
  plot_layout(axes = "collect") &
  scale_color_manual(values = colors[c(10, 5, 1)])

ggsave(paste0(home, "/figures/concept.pdf"), width = 17, height = 8, units = "cm", device = cairo_pdf)

Supplementary plot

# Plot prior vs posterior
# Refit with sampling on prior

ggsave(paste0(home, "/figures/supp/pp_check.pdf" ), width = 11, height = 11, units = "cm")
Warning in grid.Call(C_textBounds, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing growth rate →' in 'mbcsToSbcs': dot
substituted for <e2>
Warning in grid.Call(C_textBounds, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing growth rate →' in 'mbcsToSbcs': dot
substituted for <86>
Warning in grid.Call(C_textBounds, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing growth rate →' in 'mbcsToSbcs': dot
substituted for <92>
Warning in grid.Call(C_textBounds, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing temperature →' in 'mbcsToSbcs': dot
substituted for <e2>
Warning in grid.Call(C_textBounds, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing temperature →' in 'mbcsToSbcs': dot
substituted for <86>
Warning in grid.Call(C_textBounds, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing temperature →' in 'mbcsToSbcs': dot
substituted for <92>
Warning in grid.Call(C_textBounds, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing growth rate →' in 'mbcsToSbcs': dot
substituted for <e2>
Warning in grid.Call(C_textBounds, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing growth rate →' in 'mbcsToSbcs': dot
substituted for <86>
Warning in grid.Call(C_textBounds, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing growth rate →' in 'mbcsToSbcs': dot
substituted for <92>
Warning in grid.Call(C_textBounds, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing temperature →' in 'mbcsToSbcs': dot
substituted for <e2>
Warning in grid.Call(C_textBounds, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing temperature →' in 'mbcsToSbcs': dot
substituted for <86>
Warning in grid.Call(C_textBounds, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing temperature →' in 'mbcsToSbcs': dot
substituted for <92>
Warning in grid.Call.graphics(C_text, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing growth rate →' in 'mbcsToSbcs': dot
substituted for <e2>
Warning in grid.Call.graphics(C_text, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing growth rate →' in 'mbcsToSbcs': dot
substituted for <86>
Warning in grid.Call.graphics(C_text, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing growth rate →' in 'mbcsToSbcs': dot
substituted for <92>
Warning in grid.Call.graphics(C_text, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing growth rate →' in 'mbcsToSbcs': dot
substituted for <e2>
Warning in grid.Call.graphics(C_text, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing growth rate →' in 'mbcsToSbcs': dot
substituted for <86>
Warning in grid.Call.graphics(C_text, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing growth rate →' in 'mbcsToSbcs': dot
substituted for <92>
Warning in grid.Call.graphics(C_text, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing temperature →' in 'mbcsToSbcs': dot
substituted for <e2>
Warning in grid.Call.graphics(C_text, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing temperature →' in 'mbcsToSbcs': dot
substituted for <86>
Warning in grid.Call.graphics(C_text, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing temperature →' in 'mbcsToSbcs': dot
substituted for <92>
Warning in grid.Call.graphics(C_text, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing temperature →' in 'mbcsToSbcs': dot
substituted for <e2>
Warning in grid.Call.graphics(C_text, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing temperature →' in 'mbcsToSbcs': dot
substituted for <86>
Warning in grid.Call.graphics(C_text, as.graphicsAnnot(x$label), x$x, x$y, :
conversion failure on 'Increasing temperature →' in 'mbcsToSbcs': dot
substituted for <92>
m4p <- brm(A ~ temp_sc + temp_sq_sc + (1 + temp_sc + temp_sq_sc | area_f),
           data = dat,
           cores = 4,
           chains = 4,
           iter = 4000,
           family = student,
           prior = set_prior("normal(0,5)", class = "b"),
           sample_prior = TRUE,
           save_pars = save_pars(all = TRUE),
           control = list(adapt_delta = 0.95),
           )
Compiling Stan program...
Trying to compile a simple C file
Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
using C compiler: ‘Apple clang version 15.0.0 (clang-1500.1.0.2.5)’
using SDK: ‘MacOSX14.2.sdk’
clang -arch arm64 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG   -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/Rcpp/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/unsupported"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/src/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppParallel/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/rstan/include" -DEIGEN_NO_DEBUG  -DBOOST_DISABLE_ASSERTS  -DBOOST_PENDING_INTEGER_LOG2_HPP  -DSTAN_THREADS  -DUSE_STANC3 -DSTRICT_R_HEADERS  -DBOOST_PHOENIX_NO_VARIADIC_EXPRESSION  -D_HAS_AUTO_PTR_ETC=0  -include '/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp'  -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1   -I/opt/R/arm64/include    -fPIC  -falign-functions=64 -Wall -g -O2  -c foo.c -o foo.o
In file included from <built-in>:1:
In file included from /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp:22:
In file included from /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
In file included from /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/Eigen/Core:19:
/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:679:10: fatal error: 'cmath' file not found
#include <cmath>
         ^~~~~~~
1 error generated.
make: *** [foo.o] Error 1
Start sampling
prior_summary(m4p)
                   prior     class       coef  group resp dpar nlpar lb ub
             normal(0,5)         b                                        
             normal(0,5)         b    temp_sc                             
             normal(0,5)         b temp_sq_sc                             
 student_t(3, 43.4, 4.4) Intercept                                        
    lkj_corr_cholesky(1)         L                                        
    lkj_corr_cholesky(1)         L            area_f                      
           gamma(2, 0.1)        nu                                    1   
    student_t(3, 0, 4.4)        sd                                    0   
    student_t(3, 0, 4.4)        sd            area_f                  0   
    student_t(3, 0, 4.4)        sd  Intercept area_f                  0   
    student_t(3, 0, 4.4)        sd    temp_sc area_f                  0   
    student_t(3, 0, 4.4)        sd temp_sq_sc area_f                  0   
    student_t(3, 0, 4.4)     sigma                                    0   
       source
         user
 (vectorized)
 (vectorized)
      default
      default
 (vectorized)
      default
      default
 (vectorized)
 (vectorized)
 (vectorized)
 (vectorized)
      default
get_variables(m4p)
 [1] "b_Intercept"                       "b_temp_sc"                        
 [3] "b_temp_sq_sc"                      "sd_area_f__Intercept"             
 [5] "sd_area_f__temp_sc"                "sd_area_f__temp_sq_sc"            
 [7] "cor_area_f__Intercept__temp_sc"    "cor_area_f__Intercept__temp_sq_sc"
 [9] "cor_area_f__temp_sc__temp_sq_sc"   "sigma"                            
[11] "nu"                                "Intercept"                        
[13] "r_area_f[BS,Intercept]"            "r_area_f[BT,Intercept]"           
[15] "r_area_f[FB,Intercept]"            "r_area_f[FM,Intercept]"           
[17] "r_area_f[HO,Intercept]"            "r_area_f[JM,Intercept]"           
[19] "r_area_f[MU,Intercept]"            "r_area_f[RA,Intercept]"           
[21] "r_area_f[SI_EK,Intercept]"         "r_area_f[SI_HA,Intercept]"        
[23] "r_area_f[BS,temp_sc]"              "r_area_f[BT,temp_sc]"             
[25] "r_area_f[FB,temp_sc]"              "r_area_f[FM,temp_sc]"             
[27] "r_area_f[HO,temp_sc]"              "r_area_f[JM,temp_sc]"             
[29] "r_area_f[MU,temp_sc]"              "r_area_f[RA,temp_sc]"             
[31] "r_area_f[SI_EK,temp_sc]"           "r_area_f[SI_HA,temp_sc]"          
[33] "r_area_f[BS,temp_sq_sc]"           "r_area_f[BT,temp_sq_sc]"          
[35] "r_area_f[FB,temp_sq_sc]"           "r_area_f[FM,temp_sq_sc]"          
[37] "r_area_f[HO,temp_sq_sc]"           "r_area_f[JM,temp_sq_sc]"          
[39] "r_area_f[MU,temp_sq_sc]"           "r_area_f[RA,temp_sq_sc]"          
[41] "r_area_f[SI_EK,temp_sq_sc]"        "r_area_f[SI_HA,temp_sq_sc]"       
[43] "prior_Intercept"                   "prior_b"                          
[45] "prior_sigma"                       "prior_nu"                         
[47] "prior_sd_area_f"                   "prior_cor_area_f"                 
[49] "lprior"                            "lp__"                             
[51] "z_1[1,1]"                          "z_1[2,1]"                         
[53] "z_1[3,1]"                          "z_1[1,2]"                         
[55] "z_1[2,2]"                          "z_1[3,2]"                         
[57] "z_1[1,3]"                          "z_1[2,3]"                         
[59] "z_1[3,3]"                          "z_1[1,4]"                         
[61] "z_1[2,4]"                          "z_1[3,4]"                         
[63] "z_1[1,5]"                          "z_1[2,5]"                         
[65] "z_1[3,5]"                          "z_1[1,6]"                         
[67] "z_1[2,6]"                          "z_1[3,6]"                         
[69] "z_1[1,7]"                          "z_1[2,7]"                         
[71] "z_1[3,7]"                          "z_1[1,8]"                         
[73] "z_1[2,8]"                          "z_1[3,8]"                         
[75] "z_1[1,9]"                          "z_1[2,9]"                         
[77] "z_1[3,9]"                          "z_1[1,10]"                        
[79] "z_1[2,10]"                         "z_1[3,10]"                        
[81] "L_1[1,1]"                          "L_1[2,1]"                         
[83] "L_1[3,1]"                          "L_1[1,2]"                         
[85] "L_1[2,2]"                          "L_1[3,2]"                         
[87] "L_1[1,3]"                          "L_1[2,3]"                         
[89] "L_1[3,3]"                          "accept_stat__"                    
[91] "stepsize__"                        "treedepth__"                      
[93] "n_leapfrog__"                      "divergent__"                      
[95] "energy__"                         
plot(m4)

post_draws <- get_post_draws(model = m4p, 
                             params = c("b_Intercept", "b_temp_sc", "sigma",
                                        "nu"))
Warning: Dropping 'draws_df' class as required metadata was removed.
pivot_longer: reorganized (b_Intercept, b_temp_sc, sigma, nu) into (parameter, value) [was 8000x4, now 32000x2]
mutate: new variable 'type' (character) with one unique value and 0% NA
prior_draws <- get_prior_draws(model = m4p, 
                               params = c("prior_Intercept", "prior_b", "prior_sigma",
                                          "prior_nu"))
Warning: Dropping 'draws_df' class as required metadata was removed.
pivot_longer: reorganized (prior_Intercept, prior_b, prior_sigma, prior_nu) into (parameter, value) [was 8000x4, now 32000x2]
mutate: changed 32,000 values (100%) of 'parameter' (0 new NA)
mutate: new variable 'type' (character) with one unique value and 0% NA
dist <- bind_rows(post_draws %>%
                    mutate(parameter = ifelse(parameter == "b_Intercept",
                                              "Intercept",
                                              parameter),
                           
                           parameter = ifelse(parameter == "b_temp_sc",
                                              "temp_sc",
                                              parameter)),
                  
                  prior_draws %>%
                    mutate(parameter = ifelse(parameter == "b", "temp_sc",
                                              parameter))
                  )
mutate: changed 16,000 values (50%) of 'parameter' (0 new NA)
mutate: changed 8,000 values (25%) of 'parameter' (0 new NA)
plot_prior_post(dist, type) +
  theme(legend.position.inside = c(0.93, 0.97)) +  
  guides(fill = guide_legend(position = "inside")) +  
  labs(x = "Value", y = "Density") +
  facet_wrap(~parameter, scales = "free")
Warning: A numeric `legend.position` argument in `theme()` was deprecated in ggplot2
3.5.0.
ℹ Please use the `legend.position.inside` argument of `theme()` instead.

ggsave(paste0(home, "/figures/supp/brms_prior.pdf" ), width = 17, height = 17, units = "cm")

# Posterior predictive
pp_check(m4p) + 
  theme(legend.position.inside = c(0.8, 0.8)) + 
  guides(color = guide_legend(position = "inside")) + 
  labs(x = "Growth coefficient (*A*)")
Using 10 posterior draws for ppc type 'dens_overlay' by default.

Fit models of K and Linf to temperature

# Quadratic effect of temperature
m4k <- brm(k ~ temp_sc + temp_sq_sc + (1 + temp_sc + temp_sq_sc | area_f),
           data = dat,
           cores = 4,
           chains = 4,
           iter = 4000,
           seed = 99,
           family = student,
           prior = set_prior("normal(0,5)", class = "b"),
           save_pars = save_pars(all = TRUE),
           control = list(adapt_delta = 0.999),
           )
Compiling Stan program...
Trying to compile a simple C file
Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
using C compiler: ‘Apple clang version 15.0.0 (clang-1500.1.0.2.5)’
using SDK: ‘MacOSX14.2.sdk’
clang -arch arm64 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG   -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/Rcpp/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/unsupported"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/src/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppParallel/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/rstan/include" -DEIGEN_NO_DEBUG  -DBOOST_DISABLE_ASSERTS  -DBOOST_PENDING_INTEGER_LOG2_HPP  -DSTAN_THREADS  -DUSE_STANC3 -DSTRICT_R_HEADERS  -DBOOST_PHOENIX_NO_VARIADIC_EXPRESSION  -D_HAS_AUTO_PTR_ETC=0  -include '/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp'  -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1   -I/opt/R/arm64/include    -fPIC  -falign-functions=64 -Wall -g -O2  -c foo.c -o foo.o
In file included from <built-in>:1:
In file included from /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp:22:
In file included from /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
In file included from /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/Eigen/Core:19:
/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:679:10: fatal error: 'cmath' file not found
#include <cmath>
         ^~~~~~~
1 error generated.
make: *** [foo.o] Error 1
Start sampling
prior_summary(m4k)
                  prior     class       coef  group resp dpar nlpar lb ub
            normal(0,5)         b                                        
            normal(0,5)         b    temp_sc                             
            normal(0,5)         b temp_sq_sc                             
 student_t(3, 0.2, 2.5) Intercept                                        
   lkj_corr_cholesky(1)         L                                        
   lkj_corr_cholesky(1)         L            area_f                      
          gamma(2, 0.1)        nu                                    1   
   student_t(3, 0, 2.5)        sd                                    0   
   student_t(3, 0, 2.5)        sd            area_f                  0   
   student_t(3, 0, 2.5)        sd  Intercept area_f                  0   
   student_t(3, 0, 2.5)        sd    temp_sc area_f                  0   
   student_t(3, 0, 2.5)        sd temp_sq_sc area_f                  0   
   student_t(3, 0, 2.5)     sigma                                    0   
       source
         user
 (vectorized)
 (vectorized)
      default
      default
 (vectorized)
      default
      default
 (vectorized)
 (vectorized)
 (vectorized)
 (vectorized)
      default
m4l <- brm(linf ~ temp_sc + temp_sq_sc + (1 + temp_sc + temp_sq_sc | area_f),
           data = dat,
           cores = 4,
           chains = 4,
           iter = 4000,
           seed = 99,
           family = student,
           prior = set_prior("normal(0,5)", class = "b"),
           save_pars = save_pars(all = TRUE)
           )
Compiling Stan program...
Trying to compile a simple C file
Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
using C compiler: ‘Apple clang version 15.0.0 (clang-1500.1.0.2.5)’
using SDK: ‘MacOSX14.2.sdk’
clang -arch arm64 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG   -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/Rcpp/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/unsupported"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/src/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppParallel/include/"  -I"/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/rstan/include" -DEIGEN_NO_DEBUG  -DBOOST_DISABLE_ASSERTS  -DBOOST_PENDING_INTEGER_LOG2_HPP  -DSTAN_THREADS  -DUSE_STANC3 -DSTRICT_R_HEADERS  -DBOOST_PHOENIX_NO_VARIADIC_EXPRESSION  -D_HAS_AUTO_PTR_ETC=0  -include '/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp'  -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1   -I/opt/R/arm64/include    -fPIC  -falign-functions=64 -Wall -g -O2  -c foo.c -o foo.o
In file included from <built-in>:1:
In file included from /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/StanHeaders/include/stan/math/prim/fun/Eigen.hpp:22:
In file included from /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
In file included from /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/Eigen/Core:19:
/Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:679:10: fatal error: 'cmath' file not found
#include <cmath>
         ^~~~~~~
1 error generated.
make: *** [foo.o] Error 1
Start sampling
prior_summary(m4l)
                   prior     class       coef  group resp dpar nlpar lb ub
             normal(0,5)         b                                        
             normal(0,5)         b    temp_sc                             
             normal(0,5)         b temp_sq_sc                             
 student_t(3, 340.9, 80) Intercept                                        
    lkj_corr_cholesky(1)         L                                        
    lkj_corr_cholesky(1)         L            area_f                      
           gamma(2, 0.1)        nu                                    1   
     student_t(3, 0, 80)        sd                                    0   
     student_t(3, 0, 80)        sd            area_f                  0   
     student_t(3, 0, 80)        sd  Intercept area_f                  0   
     student_t(3, 0, 80)        sd    temp_sc area_f                  0   
     student_t(3, 0, 80)        sd temp_sq_sc area_f                  0   
     student_t(3, 0, 80)     sigma                                    0   
       source
         user
 (vectorized)
 (vectorized)
      default
      default
 (vectorized)
      default
      default
 (vectorized)
 (vectorized)
 (vectorized)
 (vectorized)
      default

Plot these fits

# PP_check
p1 <- pp_check(m4k) + 
  theme(legend.position.inside = c(0.8, 0.8),
        axis.title.x = element_markdown()) + 
  guides(color = guide_legend(position = "inside")) + 
  labs(x = "*k*")
Using 10 posterior draws for ppc type 'dens_overlay' by default.
p2 <- pp_check(m4l) + 
  coord_cartesian(xlim = c(0, 1000)) + 
  theme(legend.position.inside = c(0.8, 0.8)) + 
  guides(color = guide_legend(position = "inside")) + 
  labs(x = expression(paste(italic(L[infinity]), " [mm]")))
Using 10 posterior draws for ppc type 'dens_overlay' by default.
Coordinate system already present. Adding new coordinate system, which will
replace the existing one.
p1 + p2

ggsave(paste0(home, "/figures/supp/k_linf_pp_check.pdf" ), width = 17, height = 11, units = "cm")


# Conditional predictions
m4k_pred <- dat %>%
  group_by(area) %>% 
  data_grid(temp_sc = seq_range(temp_sc, n = 100)) %>% 
  ungroup() %>% 
  mutate(area_f = as.factor(area),
         temp_sq_sc = temp_sc*temp_sc,
         temp = (temp_sc * sd(VBG$temp)) + mean(VBG$temp)) %>% 
  add_epred_draws(m4k)
group_by: one grouping variable (area)
ungroup: no grouping variables
mutate: new variable 'area_f' (factor) with 10 unique values and 0% NA
        new variable 'temp_sq_sc' (double) with 999 unique values and 0% NA
        new variable 'temp' (double) with 999 unique values and 0% NA
m4l_pred <- dat %>%
  group_by(area) %>% 
  data_grid(temp_sc = seq_range(temp_sc, n = 100)) %>% 
  ungroup() %>% 
  mutate(area_f = as.factor(area),
         temp_sq_sc = temp_sc*temp_sc,
         temp = (temp_sc * sd(VBG$temp)) + mean(VBG$temp)) %>% 
  add_epred_draws(m4l)
group_by: one grouping variable (area)
ungroup: no grouping variables
mutate: new variable 'area_f' (factor) with 10 unique values and 0% NA
        new variable 'temp_sq_sc' (double) with 999 unique values and 0% NA
        new variable 'temp' (double) with 999 unique values and 0% NA
#m4_kl <- bind_rows(m4k_pred, m4l_pred)

# K  
pk <- ggplot(dat, aes(temp, k, color = factor(area, order$area)), size = 0.6) +
  geom_point() +
  scale_color_manual(values = colors, name = "Site") +
  scale_fill_manual(values = colors, name = "Site") +
  guides(fill = "none",
         color = guide_legend(nrow = 1, reverse = TRUE, title.position = "top", title.hjust = 0.5,
                              override.aes = list(size = 1))) +
  theme(axis.title.y = ggtext::element_markdown(),
        legend.position = "bottom",
        legend.direction = "horizontal") + 
  stat_lineribbon(data = m4k_pred,
                  aes(temp, y = .epred,
                      fill = factor(area, order$area)),
                  alpha = 0.1, .width = 0.95) +
  stat_lineribbon(data = m4k_pred,
                  aes(temp, y = .epred,
                      fill = factor(area, order$area)),
                  alpha = 1, .width = 0) +
  guides(fill = "none", color = guide_legend(nrow = 1, reverse = TRUE,
                                             title.position = "top", title.hjust = 0.5,
                                             override.aes = list(fill = NA))) + 
  labs(x = "Mean site temperature [°C]", y = "*k*", color = "")

# Linf
pl <- ggplot(dat, aes(temp, linf, color = factor(area, order$area)), size = 0.6) +
  geom_point() +
  scale_color_manual(values = colors, name = "Site") +
  scale_fill_manual(values = colors, name = "Site") +
  theme(legend.position = "bottom",
        legend.direction = "horizontal") +
  stat_lineribbon(data = m4l_pred,
                  aes(temp, y = .epred,
                      fill = factor(area, order$area)),
                  alpha = 0.1, .width = 0.95) +
  stat_lineribbon(data = m4l_pred,
                  aes(temp, y = .epred,
                      fill = factor(area, order$area)),
                  alpha = 1, .width = 0) +
  guides(fill = "none", color = guide_legend(nrow = 1, reverse = TRUE,
                                             title.position = "top", title.hjust = 0.5,
                                             override.aes = list(fill = NA))) + 
  labs(x = "Mean site temperature [°C]", y = expression(paste(italic(L[infinity]), " [mm]")),
       color = "")

pk / pl + 
  plot_layout(axis = "collect", guides = "collect") & 
  theme(legend.position = "bottom")

ggsave(paste0(home, "/figures/supp/k_linf_conditional.pdf" ), width = 17, height = 25, units = "cm")

Print global temperature slopes

summary(m4k)
 Family: student 
  Links: mu = identity; sigma = identity; nu = identity 
Formula: k ~ temp_sc + temp_sq_sc + (1 + temp_sc + temp_sq_sc | area_f) 
   Data: dat (Number of observations: 306) 
  Draws: 4 chains, each with iter = 4000; warmup = 2000; thin = 1;
         total post-warmup draws = 8000

Group-Level Effects: 
~area_f (Number of levels: 10) 
                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(Intercept)                 0.03      0.01     0.01     0.05 1.00     3097
sd(temp_sc)                   0.02      0.01     0.00     0.04 1.00     1812
sd(temp_sq_sc)                0.01      0.01     0.00     0.02 1.00     2558
cor(Intercept,temp_sc)        0.14      0.41    -0.67     0.86 1.00     5520
cor(Intercept,temp_sq_sc)     0.01      0.49    -0.86     0.85 1.00     6136
cor(temp_sc,temp_sq_sc)      -0.07      0.50    -0.89     0.86 1.00     5407
                          Tail_ESS
sd(Intercept)                 4088
sd(temp_sc)                   2862
sd(temp_sq_sc)                3626
cor(Intercept,temp_sc)        4955
cor(Intercept,temp_sq_sc)     4832
cor(temp_sc,temp_sq_sc)       5967

Population-Level Effects: 
           Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept      0.20      0.01     0.18     0.22 1.00     2521     3550
temp_sc        0.00      0.01    -0.02     0.02 1.00     3561     3273
temp_sq_sc    -0.01      0.01    -0.02     0.00 1.00     3554     3433

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     0.04      0.00     0.04     0.04 1.00     7852     5735
nu       24.74     13.55     8.13    60.16 1.00     8558     6433

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
summary(m4l)
 Family: student 
  Links: mu = identity; sigma = identity; nu = identity 
Formula: linf ~ temp_sc + temp_sq_sc + (1 + temp_sc + temp_sq_sc | area_f) 
   Data: dat (Number of observations: 306) 
  Draws: 4 chains, each with iter = 4000; warmup = 2000; thin = 1;
         total post-warmup draws = 8000

Group-Level Effects: 
~area_f (Number of levels: 10) 
                          Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS
sd(Intercept)                29.25     13.07     9.16    60.43 1.00     2870
sd(temp_sc)                  52.76     18.25    24.40    95.43 1.00     3419
sd(temp_sq_sc)               14.58     10.27     0.65    39.15 1.00     2674
cor(Intercept,temp_sc)       -0.41      0.34    -0.92     0.32 1.00     3192
cor(Intercept,temp_sq_sc)    -0.10      0.48    -0.89     0.81 1.00     5364
cor(temp_sc,temp_sq_sc)       0.29      0.43    -0.65     0.92 1.00     7399
                          Tail_ESS
sd(Intercept)                 3292
sd(temp_sc)                   3666
sd(temp_sq_sc)                3629
cor(Intercept,temp_sc)        4256
cor(Intercept,temp_sq_sc)     4896
cor(temp_sc,temp_sq_sc)       5424

Population-Level Effects: 
           Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Intercept    335.60     12.17   313.32   361.76 1.00     3239     4081
temp_sc        0.34      4.84    -8.96     9.89 1.00    11563     5715
temp_sq_sc     5.11      4.58    -4.01    13.77 1.00     6537     4722

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma    48.52      4.56    39.91    57.82 1.00     6300     4478
nu        2.55      0.53     1.73     3.78 1.00     6539     4752

Draws were sampled using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).

Calculate VBGE fits for specific temperatures curves. Since the global effects of temperature are flat, we have to do it with area-specific predictions. But which ones? Here I predict k and Linf for 2 temperatures: mean in area and +3 °C. I then use those to calculate reference and warm VBGE curves. There’s a tendency for an increase in size-at-age in warmer areas. This we sort of see already in the plots of Linf against temperature by area, where cool populations decline and warm increase their Linf.

# Area specific parameters, then calculate growth curves from those

# For the plots below I trim age to 12, which is approximate maximum... with 40, we are essentially just plotting Linf, which we already have better plots for
age_bc <- 0:40 

# Predict k across areas
m4k_pars <- dat %>%
  group_by(area_f) %>% 
  data_grid(temp = mean(temp)) %>% 
  ungroup() %>% 
  mutate(temp2 = temp + 3) %>% 
  pivot_longer(c("temp2", "temp"), values_to = "temp") %>%
  dplyr::select(-name) %>% 
  mutate(temp_sc = (temp - mean(dat$temp)) / sd(dat$temp),
         temp_sq_sc = temp_sc*temp_sc) %>% 
  add_epred_draws(m4k) %>% 
  rename(k = .epred) %>% 
  group_by(temp, area_f) %>%
  summarise(k_median = quantile(k, prob = 0.5),
            k_lwr = quantile(k, prob = 0.05),
            k_upr = quantile(k, prob = 0.95))
group_by: one grouping variable (area_f)
ungroup: no grouping variables
mutate: new variable 'temp2' (double) with 10 unique values and 0% NA
pivot_longer: reorganized (temp2) into (name) [was 10x3, now 20x3]
mutate: new variable 'temp_sc' (double) with 20 unique values and 0% NA
        new variable 'temp_sq_sc' (double) with 20 unique values and 0% NA
rename: renamed one variable (k)
group_by: 2 grouping variables (temp, area_f)
summarise: now 20 rows and 5 columns, one group variable remaining (temp)
m4l_pars <- dat %>%
  group_by(area_f) %>% 
  data_grid(temp = mean(temp)) %>% 
  ungroup() %>% 
  mutate(temp2 = temp + 3) %>% 
  pivot_longer(c("temp2", "temp"), values_to = "temp") %>%
  dplyr::select(-name) %>% 
  mutate(temp_sc = (temp - mean(dat$temp)) / sd(dat$temp),
         temp_sq_sc = temp_sc*temp_sc) %>% 
  add_epred_draws(m4l) %>% 
  rename(linf = .epred) %>% 
  group_by(temp, area_f) %>% 
  summarise(linf_median = quantile(linf, prob = 0.5),
            linf_lwr = quantile(linf, prob = 0.05),
            linf_upr = quantile(linf, prob = 0.95))
group_by: one grouping variable (area_f)
ungroup: no grouping variables
mutate: new variable 'temp2' (double) with 10 unique values and 0% NA
pivot_longer: reorganized (temp2) into (name) [was 10x3, now 20x3]
mutate: new variable 'temp_sc' (double) with 20 unique values and 0% NA
        new variable 'temp_sq_sc' (double) with 20 unique values and 0% NA
rename: renamed one variable (linf)
group_by: 2 grouping variables (temp, area_f)
summarise: now 20 rows and 5 columns, one group variable remaining (temp)
est <- left_join(m4k_pars, 
                 m4l_pars,
                 by = c("area_f", "temp"))
left_join: added 3 columns (linf_median, linf_lwr, linf_upr)
           > rows only in x    0
           > rows only in y  ( 0)
           > matched rows     20
           >                 ====
           > rows total       20
# Expand by age and calculate length-at-age
est_sum <- est %>% 
  crossing(age_bc) %>% 
  mutate(length_mm = linf_median*(1-exp(-k_median*age_bc)),
         length_mm_lwr = linf_lwr*(1-exp(-k_lwr*age_bc)),
         length_mm_upr = linf_upr*(1-exp(-k_upr*age_bc))) %>% 
  group_by(area_f) %>% 
  mutate(temp_cat = ifelse(temp == min(temp), "Mean °C", "Mean + 2°C"))
mutate: new variable 'length_mm' (double) with 801 unique values and 0% NA
        new variable 'length_mm_lwr' (double) with 801 unique values and 0% NA
        new variable 'length_mm_upr' (double) with 801 unique values and 0% NA
group_by: one grouping variable (area_f)
mutate (grouped): new variable 'temp_cat' (character) with 2 unique values and 0% NA
pal <- rev(brewer.pal(n = 6, name = "Paired")[c(2, 6)])

est_sum %>% 
  ggplot(aes(age_bc, length_mm, color = temp_cat)) +  
  geom_ribbon(aes(age_bc, ymin = length_mm_lwr, ymax = length_mm_upr, fill = temp_cat),
              alpha = 0.3, color = NA) +
  geom_line() +
  coord_cartesian(xlim = c(0, 12)) +
  scale_color_manual(values = pal, name = "Temperature") +
  scale_fill_manual(values = pal, name = "Temperature") +
  facet_wrap(~factor(area_f, rev(order$area)), ncol = 4) + 
  theme(legend.position = "bottom") +
  labs(x = "Age (years)", y = "Predicted length (mm)")

# Difference in size-at-age?
warm <- est_sum %>% 
  filter(temp_cat == "Mean + 2°C") %>% 
  dplyr::select(area_f, length_mm, age_bc) %>%
  rename(length_mm_warm = length_mm)
filter (grouped): removed 410 rows (50%), 410 rows remaining
rename: renamed one variable (length_mm_warm)
ref <- est_sum %>% 
  filter(temp_cat == "Mean °C") %>% 
  dplyr::select(area_f, length_mm, age_bc) %>%
  rename(length_mm_ref = length_mm)
filter (grouped): removed 410 rows (50%), 410 rows remaining
rename: renamed one variable (length_mm_ref)
delta <- left_join(warm, ref, by = c("area_f", "age_bc")) %>% 
  mutate(diff = length_mm_warm - length_mm_ref)
left_join: added one column (length_mm_ref)
           > rows only in x     0
           > rows only in y  (  0)
           > matched rows     410
           >                 =====
           > rows total       410
mutate (grouped): new variable 'diff' (double) with 401 unique values and 0% NA
ggplot(delta, aes(age_bc, diff, color = factor(area_f, order$area))) +
  geom_line() +
  coord_cartesian(xlim = c(0, 12)) +
  labs(x = "Age", color = "Area",
       y = "Difference in size-at-age with 3 st.dev. increase in temperature (mm)") + 
  scale_color_manual(values = colors)